RNN_Example Tutorial bugs since TF 1.0

by: Jamy4000, 7 years ago


Hey guys !

I've been spending a long time trying to figure an error I'm getting while applying the code from the RNN tutorial (11th of Deep Learning). After modifying some lines so it could work with the new TF version, I'm stuck with this :


Traceback (most recent call last):
  File "RNN_Example.py", line 70, in <module>
    train_neural_network(x)
  File "RNN_Example.py", line 41, in train_neural_network
    prediction = recurrent_neural_network(x)
  File "RNN_Example.py", line 31, in recurrent_neural_network
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype = tf.float32)
  File "/home/jamy4000/.local/lib/python3.5/site-packages/tensorflow/contrib/rnn/python/ops/core_rnn.py", line 138, in static_rnn
    "shape inference, but saw value None." % i)
ValueError: Input size (dimension 0 of inputs) must be accessible via shape inference, but saw value None.


if someone has any idea, it would be awesome !

Here's my code btw :

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib import rnn #, rnn_cell

mnist = input_data.read_data_sets("/tmp/data/", one_hot = True)

hm_epochs = 10
n_classes = 10
batch_size = 128
chunk_size = 28
n_chunks = 28
rnn_size = 128

x = tf.placeholder('float', [None, n_chunks, chunk_size])
y = tf.placeholder('float')

def recurrent_neural_network(x):

layer = {'weights':tf.Variable(tf.random_normal([rnn_size, n_classes])),
  'biases': tf.Variable(tf.random_normal([n_classes]))}

x = tf.transpose(x, [1, 0, 2])
print(x)
tf.reshape(x, [-1, chunk_size])
print(x)
x = tf.split(x, n_chunks, 0)

lstm_cell = rnn.BasicLSTMCell(rnn_size)
outputs, states = rnn.static_rnn(lstm_cell, x, dtype = tf.float32)

output = tf.matmul(outputs[-1], layer['weights']) + layer['biases']

return output



def train_neural_network(x):
prediction = recurrent_neural_network(x)

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))

optimizer = tf.train.AdamOptimizer().minimize(cost)


with tf.Session() as sess:
sess.run(tf.global_variables_initializer())

for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples/batch_size)):
epoch_x, epoch_y = mnist.train.next_batch(batch_size)

epoch_x = epoch_x.reshape(batch_size, n_chunks, chunk_size)

_, c = sess.run([optimizer, cost], feed_dict= {x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)


correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))

print('Accuracy', accuracy.eval({x:mnist.test.images.reshape((-1, n_chunks, chunk_size)), y:mnist.test.labels}))

train_neural_network(x)


Thank you really much ! :)



You must be logged in to post. Please login or register an account.